p2m and m2p tables are now 4 bytes per entry always, even on x86/64.
Fixed mapping of m2p table into kernel space on x86/64.
Signed-off-by: keir.fraser@cl.cam.ac.uk
*/
shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
DEFINE_PER_CPU(int, nr_multicall_ents);
}
#endif
- phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
+ phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
cur_pgd = init_mm.pgd = (pgd_t *)xen_start_info.pt_base;
start_pfn = (__pa(xen_start_info.pt_base) >> PAGE_SHIFT) +
{
phys_to_machine_mapping = alloc_bootmem_low_pages(
max_pfn * sizeof(unsigned long));
- memset(phys_to_machine_mapping, ~0, max_pfn * sizeof(unsigned long));
+ memset(phys_to_machine_mapping, ~0, max_pfn * sizeof(unsigned int));
memcpy(phys_to_machine_mapping,
(unsigned long *)xen_start_info.mfn_list,
xen_start_info.nr_pages * sizeof(unsigned long));
#define copy_user_page(to, from, vaddr) copy_page(to, from)
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned long *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+extern unsigned int *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
+#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
static inline unsigned long phys_to_machine(unsigned long phys)
{
unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
* require. In all the cases we care about, the high bit gets shifted out
* (e.g., phys_to_machine()) so behaviour there is correct.
*/
-#define INVALID_P2M_ENTRY (~0UL)
+#define INVALID_P2M_ENTRY (~0U)
#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
#define pte_page(_pte) \
({ \
shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
EXPORT_SYMBOL(HYPERVISOR_shared_info);
-unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
EXPORT_SYMBOL(phys_to_machine_mapping);
DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
}
#endif
- phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
+ phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
return max_low_pfn;
}
extern void time_suspend(void);
extern void time_resume(void);
extern unsigned long max_pfn;
- extern unsigned long *pfn_to_mfn_frame_list;
+ extern unsigned int *pfn_to_mfn_frame_list;
suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
if ( suspend_record == NULL )
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned long *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+extern unsigned int *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
+#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
static inline unsigned long phys_to_machine(unsigned long phys)
{
unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
* require. In all the cases we care about, the high bit gets shifted out
* (e.g., phys_to_machine()) so behaviour there is correct.
*/
-#define INVALID_P2M_ENTRY (~0UL)
+#define INVALID_P2M_ENTRY (~0U)
#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
#define pte_pfn(_pte) \
({ \
ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
+ PAGE_SHIFT] = INVALID_M2P_ENTRY;
d->arch.mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
+ PAGE_SHIFT] = INVALID_M2P_ENTRY;
ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
#ifdef __x86_64__
struct switch_to_user stu;
struct exec_domain *ed = current;
- if ( unlikely(copy_from_user(&stu, (void *)regs->rsp, sizeof(stu))) )
+ if ( unlikely(copy_from_user(&stu, (void *)regs->rsp, sizeof(stu))) ||
+ unlikely(pagetable_val(ed->arch.pagetable_user) == 0) )
return -EFAULT;
ed->arch.flags &= ~TF_kernel_mode;
vinitrd_start = round_pgup(dsi.v_kernend);
vinitrd_end = vinitrd_start + initrd_len;
vphysmap_start = round_pgup(vinitrd_end);
- vphysmap_end = vphysmap_start + (nr_pages * sizeof(unsigned long));
+ vphysmap_end = vphysmap_start + (nr_pages * sizeof(u32));
vpt_start = round_pgup(vphysmap_end);
for ( nr_pt_pages = 2; ; nr_pt_pages++ )
{
if ( pfn > REVERSE_START )
mfn = (alloc_end>>PAGE_SHIFT) - (pfn - REVERSE_START);
#endif
- ((unsigned long *)vphysmap_start)[pfn] = mfn;
+ ((u32 *)vphysmap_start)[pfn] = mfn;
machine_to_phys_mapping[mfn] = pfn;
}
vinitrd_start = round_pgup(dsi.v_kernend);
vinitrd_end = vinitrd_start + initrd_len;
vphysmap_start = round_pgup(vinitrd_end);
- vphysmap_end = vphysmap_start + (nr_pages * sizeof(unsigned long));
+ vphysmap_end = vphysmap_start + (nr_pages * sizeof(u32));
vpt_start = round_pgup(vphysmap_end);
for ( nr_pt_pages = 2; ; nr_pt_pages++ )
{
if ( pfn > REVERSE_START )
mfn = (alloc_end>>PAGE_SHIFT) - (pfn - REVERSE_START);
#endif
- ((unsigned long *)vphysmap_start)[pfn] = mfn;
+ ((u32 *)vphysmap_start)[pfn] = mfn;
machine_to_phys_mapping[mfn] = pfn;
}
void __init paging_init(void)
{
- void *newpt;
unsigned long i, p, max;
+ l3_pgentry_t *l3rw, *l3ro;
/* Map all of physical memory. */
max = ((max_page + L1_PAGETABLE_ENTRIES - 1) &
if ( p == 0 )
panic("Not enough memory for m2p table\n");
map_pages(idle_pg_table, RDWR_MPT_VIRT_START + i*8, p,
- 1UL << L2_PAGETABLE_SHIFT, PAGE_HYPERVISOR);
+ 1UL << L2_PAGETABLE_SHIFT, PAGE_HYPERVISOR | _PAGE_USER);
memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55,
1UL << L2_PAGETABLE_SHIFT);
}
+ /*
+ * Above we mapped the M2P table as user-accessible and read-writable.
+ * Fix security by denying user access at the top level of the page table.
+ */
+ idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)] =
+ mk_l4_pgentry(l4_pgentry_val(
+ idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]) &
+ ~_PAGE_USER);
+
/* Create read-only mapping of MPT for guest-OS use. */
- newpt = (void *)alloc_xenheap_page();
- clear_page(newpt);
+ l3ro = (l3_pgentry_t *)alloc_xenheap_page();
+ clear_page(l3ro);
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
- mk_l4_pgentry((__pa(newpt) | __PAGE_HYPERVISOR | _PAGE_USER) &
+ mk_l4_pgentry((__pa(l3ro) | __PAGE_HYPERVISOR | _PAGE_USER) &
~_PAGE_RW);
/* Copy the L3 mappings from the RDWR_MPT area. */
- p = l4_pgentry_val(idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]);
- p &= PAGE_MASK;
- p += l3_table_offset(RDWR_MPT_VIRT_START) * sizeof(l3_pgentry_t);
- newpt = (void *)((unsigned long)newpt +
- (l3_table_offset(RO_MPT_VIRT_START) *
- sizeof(l3_pgentry_t)));
- memcpy(newpt, __va(p),
+ l3rw = l4_pgentry_to_l3(
+ idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]);
+ l3rw += l3_table_offset(RDWR_MPT_VIRT_START);
+ l3ro += l3_table_offset(RO_MPT_VIRT_START);
+ memcpy(l3ro, l3rw,
(RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START) >> L3_PAGETABLE_SHIFT);
/* Set up linear page table mapping. */
spin_unlock(&(_dom)->page_alloc_lock); \
} while ( 0 )
-#define INVALID_P2M_ENTRY (~0UL)
-
extern struct pfn_info *frame_table;
extern unsigned long frame_table_size;
extern unsigned long max_page;
* contiguous (or near contiguous) physical memory.
*/
#undef machine_to_phys_mapping
+#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START)
+#define INVALID_M2P_ENTRY (~0U)
+#define IS_INVALID_M2P_ENTRY(_e) (!!((_e) & (1U<<31)))
/*
* The phys_to_machine_mapping is the reversed mapping of MPT for full
* virtualization.
*/
-#undef phys_to_machine_mapping
-
-#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
#define __phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
+
/* Returns the machine physical */
static inline unsigned long phys_to_machine_mapping(unsigned long pfn)
{
* domain's pseudo-physical memory map (e.g., the shared info frame).
* Nothing to do here...
*/
- if ( unlikely(pfn & 0x80000000UL) )
+ if ( unlikely(IS_INVALID_M2P_ENTRY(pfn)) )
return rc;
if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) )
*/
#define HYPERVISOR_VIRT_START (0xFC000000UL)
#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
#endif
#ifndef __ASSEMBLY__
/* The machine->physical mapping table starts at this address, read-only. */
#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
#endif
/*